In [ ]:
# General
import argparse
import os
import random

# torch imports
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils

# maths
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from IPython.display import HTML
%matplotlib inline

manualSeed = 999
print('Random seed: ', manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
Random seed:  999
Out[ ]:
<torch._C.Generator at 0x7f7734cea2f0>

Setting up parameters for the project (based on DCGAN paper).

In [ ]:
# Root directory for dataset
dataroot = 'data'

# Number of workers for dataloader
workers = 2

# Batch size during training
batch_size = 128

# Spatial size of training images.
# All images will be resized to this using a transformer.
image_size = 64

# Number of channels in the training images. For colored ones, this is 3.
n_channels = 3

# Size of z latent vector (size of generator input)
n_z = 100

# Size of feature maps in generator
n_generator_feature = 64

# Size of feature maps in discriminator
n_discriminator_feature = 64

# Number of training epochs
num_epochs = 100

# Learning rate for optimizers
lr = 0.0002

# Beta1 hyperparam for Adam optimizers
beta1 = 0.5

# Number of GPUs available (0 for cpu).
ngpu = 1

Download the celebrity dataset.

In [ ]:
# Check if dataset is already download
! du -sh data/celeba
du: cannot access 'data/celeba': No such file or directory
In [ ]:
! pip install gdown
import gdown

# url = 'https://drive.google.com/uc?id=0B7EVK8r0v71pZjFTYXZWM3FlRnM'
# Podaci preuzeti sa repozitorijuma: https://github.com/Mckinsey666/Anime-Face-Dataset
url = 'https://drive.google.com/uc?id=1jdJXkQIWVGOeb0XJIXE3YuZQeiEPd8rM&export=download'
output = 'data.zip'
gdown.download(url, output, quiet=False)
Requirement already satisfied: gdown in /usr/local/lib/python3.6/dist-packages (3.6.4)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gdown) (1.12.0)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gdown) (4.28.1)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from gdown) (2.21.0)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (1.24.3)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2019.9.11)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (3.0.4)
Downloading...
From: https://drive.google.com/uc?id=1jdJXkQIWVGOeb0XJIXE3YuZQeiEPd8rM&export=download
To: /content/data.zip
231MB [00:01, 192MB/s]
Out[ ]:
'data.zip'
In [ ]:
%%capture
! unzip data.zip
! mkdir data/anime
! mv data/* data/anime
In [ ]:
! ls data
# ! du -sh data/celeba
anime

Working with dataset

This is an important step because we will be using the ImageFolder dataset class, which requires there to be subdirectories in the dataset’s root folder. Now, we can create the dataset, create the dataloader, set the device to run on, and finally visualize some of the training data.

In [ ]:
dataset = dset.ImageFolder(
    root=dataroot,
    transform=transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))                              
    ])
)

dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=workers
)

# Decide on which device to perform calculations
device = torch.device('cuda:0' if (torch.cuda.is_available() and ngpu > 0) else 'cpu')
In [ ]:
# Plot some images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8,8))
plt.axis('off')
plt.title('Training images')
plt.imshow(
    np.transpose(
        vutils.make_grid(
            real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),
            (1, 2, 0)
    )
)
Out[ ]:
<matplotlib.image.AxesImage at 0x7f77331d7f28>

Weight initialization

From the DCGAN paper, the authors specify that all model weights shall be randomly initialized from a Normal distribution with mean=0, stdev=0.02.

The weights_init function takes an initialized model as input and reinitializes all convolutional, convolutional-transpose, and batch normalization layers to meet this criteria. This function is applied to the models immediately after initialization.

In [ ]:
# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

Generator

The generator, $G$, is designed to map the latent space vector ($z$) to data-space. Since our data are images, converting $z$ to data-space means ultimately creating a RGB image with the same size as the training images ($3\times64\times64$).

In practice, this is accomplished through a series of stride 2 convolutional transpose layers, each paired with a 2d batch norm layer and relu activation.

The output of the generator is fed through a tanh function to return it to the input data range of $[-1, 1]$.

It is worth noting the existance of the batch norm functions after the conv-transpose layers, as this is a critical contribution of the DCGAN paper.

These layers help with the flow of gradients during training. An image from the DCGAN paper is shown bellow.

alt text

Notice, the how the inputs we set in the input section (nz, ngf, and nc) influence the generator architecture in code. nz is the length of the z input vector, ngf relates to the size of the feature maps that are propagated through the generator, and nc is the number of channels in the output image (set to 3 for RGB images). Below is the code for the generator.

In [ ]:
class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input iz Z, going into convolution
            nn.ConvTranspose2d(in_channels=n_z, out_channels=n_generator_feature * 8, kernel_size=4, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(n_generator_feature * 8),
            nn.ReLU(inplace=True),

            # state size. (n_generator_feature * 8) x 4 x 4
            nn.ConvTranspose2d(n_generator_feature * 8, n_generator_feature * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_generator_feature * 4),
            nn.ReLU(inplace=True),

            # state size. (n_generator_feature * 8) x 8 x 8
            nn.ConvTranspose2d(n_generator_feature * 4, n_generator_feature * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_generator_feature * 2),
            nn.ReLU(inplace=True),

            # state size. (n_generator_feature * 2) x 16 x 16
            nn.ConvTranspose2d(n_generator_feature * 2, n_generator_feature, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_generator_feature),
            nn.ReLU(inplace=True),

            # state size. (n_generator_feature) x 32 x 32
            nn.ConvTranspose2d(n_generator_feature, n_channels, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (n_channels) x 64 x 64
        )

    def forward(self, input):
        return self.main(input)
In [ ]:
# Create the generator
netG = Generator(ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 0):
    netG = nn.DataParallel(netG, list(range(ngpu)))

# Apply the weights_init function to randomly initialize all weights
#  to mean=0, stdev=0.2.
netG.apply(weights_init)

# Show the model
print(netG)
DataParallel(
  (module): Generator(
    (main): Sequential(
      (0): ConvTranspose2d(100, 512, kernel_size=(4, 4), stride=(1, 1), bias=False)
      (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
      (3): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
      (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (5): ReLU(inplace=True)
      (6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
      (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (8): ReLU(inplace=True)
      (9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
      (10): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (11): ReLU(inplace=True)
      (12): ConvTranspose2d(64, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
      (13): Tanh()
    )
  )
)

Discriminator

As mentioned, the discriminator, $D$, is a binary classification network that takes an image as input and outputs a scalar probability that the input image is real (as opposed to fake). Here, D takes a $3\times64\times64$ input image, processes it through a series of Conv2d, BatchNorm2d, and LeakyReLU layers, and outputs the final probability through a Sigmoid activation function. This architecture can be extended with more layers if necessary for the problem, but there is significance to the use of the strided convolution, BatchNorm, and LeakyReLUs.

The DCGAN paper mentions it is a good practice to use strided convolution rather than pooling to downsample because it lets the network learn its own pooling function. Also batch norm and leaky relu functions promote healthy gradient flow which is critical for the learning process of both G and D.

In [ ]:
class Discriminator(nn.Module):
    def __init__(self, ngpu):
        super(Discriminator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (n_channels) x 64 x 64
            nn.Conv2d(n_channels, n_discriminator_feature, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (n_discriminator_feature) x 32 x 32
            nn.Conv2d(n_discriminator_feature, n_discriminator_feature * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_discriminator_feature * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (n_discriminator_feature*2) x 16 x 16
            nn.Conv2d(n_discriminator_feature * 2, n_discriminator_feature * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_discriminator_feature * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (n_discriminator_feature*4) x 8 x 8
            nn.Conv2d(n_discriminator_feature * 4, n_discriminator_feature * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(n_discriminator_feature * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (n_discriminator_feature*8) x 4 x 4
            nn.Conv2d(n_discriminator_feature * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )

    def forward(self, input):
        return self.main(input)
In [ ]:
# Create the Discriminator
netD = Discriminator(ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netD = nn.DataParallel(netD, list(range(ngpu)))

# Apply the weights_init function to randomly initialize all weights
#  to mean=0, stdev=0.2.
netD.apply(weights_init)

# Print the model
print(netD)
Discriminator(
  (main): Sequential(
    (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
    (1): LeakyReLU(negative_slope=0.2, inplace=True)
    (2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
    (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (4): LeakyReLU(negative_slope=0.2, inplace=True)
    (5): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
    (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (7): LeakyReLU(negative_slope=0.2, inplace=True)
    (8): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
    (9): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (10): LeakyReLU(negative_slope=0.2, inplace=True)
    (11): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), bias=False)
    (12): Sigmoid()
  )
)

Loss Functions and Optimizers

With $D$ and $G$ setup, we can specify how they learn through the loss functions and optimizers. We will use the Binary Cross Entropy loss (BCELoss) function which is defined in PyTorch as: $$ \ell(x, y) = L = (l_1, \ldots, l_N)^T, l_n = -[y_N \cdot log x_n + (1-y_N) \cdot log(1 - x_n)] $$

Notice how this function provides the calculation of both log components in the objective function ($log(D(x))$ and $log(1-D(G(z))$). We can specify what part of the BCE equation to use with the $y$ input. This is accomplished in the training loop which is coming up soon, but it is important to understand how we can choose which component we wish to calculate just by changing $y$.

Next, we define our real label as 1 and the fake label as 0. These labels will be used when calculating the losses of $D$ and $G$, and this is also the convention used in the original GAN paper. Finally, we set up two separate optimizers, one for $D$ and one for $G$. As specified in the original DCGAN paper, both are Adam optimizers with learning rate 0.0002 and Beta1 = 0.5. For keeping track of the generator's learning progression, we will generatoe a fixed batch of latent vectors that are drawn from a Gaussian distribution. In the training loop, we will periodically input this fixed noise into $G$, and over the iterations we will see images form out of the noise.

In [ ]:
# Initialize the BCELoss function
criterion = nn.BCELoss()

# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, n_z, 1, 1, device=device)

# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0

# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

Training

Finally, now that we have all of the parts of the GAN framework defined, we can train it. Be mindful that training GANs is somewhat of an art form, as incorrect hyperparameter settings lead to mode collapse with little explanation of what went wrong. Here, we will closely follow Algorithm 1 from Goodfellow’s paper, while abiding by some of the best practices shown in ganhacks. Namely, we will “construct different mini-batches for real and fake” images, and also adjust $G$’s objective function to maximize $log D(G(z))$. Training is split up into two main parts. Part 1 updates the Discriminator and Part 2 updates the Generator.

Part 1 - Train the Discriminator

Recall, the goal of training the discriminator is to maximize the probability of correctly classifying a given input as real or fake. In terms of Goodfellow, we wish to “update the discriminator by ascending its stochastic gradient”. Practically, we want to maximize $log(D(x))+log(1−D(G(z)))$. Due to the separate mini-batch suggestion from ganhacks, we will calculate this in two steps. First, we will construct a batch of real samples from the training set, forward pass through $D$, calculate the loss $(log(D(x)))$, then calculate the gradients in a backward pass. Secondly, we will construct a batch of fake samples with the current generator, forward pass this batch through $D$, calculate the loss $(log(1−D(G(z))))$, and accumulate the gradients with a backward pass. Now, with the gradients accumulated from both the all-real and all-fake batches, we call a step of the Discriminator’s optimizer.

Part 2- Train the Generator

As stated in the original paper, we want to train the Generator by minimizing $log(1−D(G(z)))$ in an effort to generate better fakes.

As mentioned, this was shown by Goodfellow to not provide sufficient gradients, especially early in the learning process. As a fix, we instead wish to maximize $log(D(G(z)))$. In the code we accomplish this by: classifying the Generator output from Part 1 with the Discriminator, computing $G$’s loss using real labels as ground-truth, computing $G$’s gradients in a backward pass, and finally updating $G$’s parameters with an optimizer step. It may seem counter-intuitive to use the real labels as GT labels for the loss function, but this allows us to use the $log(x)$ part of the BCELoss (rather than the log(1−x) part) which is exactly what we want.

Finally, we will do some statistic reporting and at the end of each epoch we will push our fixed_noise batch through the generator to visually track the progress of G’s training. The training statistics reported are:

  • Loss_D - discriminator loss calculated as the sum of losses for the all real and all fake batches ($log(D(x))+log(D(G(z)))$).
  • Loss_G - generator loss calculated as $log(D(G(z)))$
  • D(x) - the average output (across the batch) of the discriminator for the all real batch. This should start close to 1 then theoretically converge to 0.5 when G gets better. Think about why this is.
  • D(G(z)) - average discriminator outputs for the all fake batch. The first number is before D is updated and the second number is after D is updated. These numbers should start near 0 and converge to 0.5 as G gets better. Think about why this is.
In [ ]:
# Training loop

# Lists that help keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0

print('Starting training loop...')

for epoch in range(num_epochs):
    # For each batch from the dataloader
    for i, data in enumerate(dataloader, 0):

        # ----------------------------------------------------------------------
        # (1) Update D: maximize log(D(x)) + log(1 - D(g(z)))
        # ----------------------------------------------------------------------
        netD.zero_grad()

        ## --------------------------
        ## Train with all-real batch
        ## --------------------------
        # format batch
        real_cpu = data[0].to(device)
        b_size = real_cpu.size(0)
        label = torch.full((b_size,), real_label, device=device)

        # forward pass real batch through D
        output = netD(real_cpu).view(-1)
        # calculate loss on all-real batch
        errD_real = criterion(output, label)
        # calculate gradients for D in backward pass
        errD_real.backward()
        D_x = output.mean().item()

        ## --------------------------
        ## Train with all-fake batch
        ## --------------------------
        noise = torch.randn(b_size, n_z, 1, 1, device=device)
        # Generate fake image batch with G
        fake = netG(noise)
        label.fill_(fake_label)
        # Classify all fake batch with D (TODO: what is `detach`?)
        output = netD(fake.detach()).view(-1)
        # Calculate D's loss on the all-fake batch
        errD_fake = criterion(output, label)
        # Calculate gradients for this batch
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        # Add the errors from all-real and all-fake batches together
        errD = errD_real + errD_fake

        # Update D
        optimizerD.step()

        # ----------------------------------------------------------------------
        # (2) Update G: maximize log(D(G(z)))
        # ----------------------------------------------------------------------
        netG.zero_grad()
        # fake labels are real for generator cost
        label.fill_(real_label)
        # We just update D, perform a new pass of the fake batch through D
        output = netD(fake).view(-1)
        # Calculate G's loss based on this output
        errG = criterion(output, label)
        # Calculate gradients for G
        errG.backward()
        D_G_z2 = output.mean().item()
        # Update G
        optimizerG.step()

        # Log training
        if i % 50 == 0:
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))

        # Save losses for plotting later
        G_losses.append(errG.item())
        D_losses.append(errD.item())

        # Check how the generator is doing by saving G's output on fixed noise
        if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
            with torch.no_grad():
                fake = netG(fixed_noise).detach().cpu()
            img_list.append(vutils.make_grid(fake, padding=2, normalize=True))

        iters += 1
Starting training loop...
[0/100][0/169]	Loss_D: 1.6886	Loss_G: 2.0477	D(x): 0.3502	D(G(z)): 0.2659 / 0.1726
[0/100][50/169]	Loss_D: 3.0689	Loss_G: 13.7757	D(x): 0.2639	D(G(z)): 0.0000 / 0.0000
[0/100][100/169]	Loss_D: 0.4274	Loss_G: 8.0631	D(x): 0.8269	D(G(z)): 0.0458 / 0.0008
[0/100][150/169]	Loss_D: 0.4088	Loss_G: 4.1067	D(x): 0.8201	D(G(z)): 0.0953 / 0.0279
[1/100][0/169]	Loss_D: 0.6274	Loss_G: 4.8375	D(x): 0.7958	D(G(z)): 0.1227 / 0.0141
[1/100][50/169]	Loss_D: 1.4515	Loss_G: 8.8742	D(x): 0.8973	D(G(z)): 0.6417 / 0.0004
[1/100][100/169]	Loss_D: 1.0669	Loss_G: 9.2235	D(x): 0.9280	D(G(z)): 0.5437 / 0.0006
[1/100][150/169]	Loss_D: 0.7539	Loss_G: 8.1596	D(x): 0.9035	D(G(z)): 0.4189 / 0.0006
[2/100][0/169]	Loss_D: 0.2141	Loss_G: 3.5928	D(x): 0.8793	D(G(z)): 0.0494 / 0.0514
[2/100][50/169]	Loss_D: 0.4177	Loss_G: 3.8907	D(x): 0.9355	D(G(z)): 0.2609 / 0.0324
[2/100][100/169]	Loss_D: 1.2397	Loss_G: 9.6235	D(x): 0.8598	D(G(z)): 0.5883 / 0.0001
[2/100][150/169]	Loss_D: 0.5036	Loss_G: 6.5380	D(x): 0.8880	D(G(z)): 0.2542 / 0.0033
[3/100][0/169]	Loss_D: 0.8315	Loss_G: 4.1315	D(x): 0.5803	D(G(z)): 0.0438 / 0.0261
[3/100][50/169]	Loss_D: 0.3172	Loss_G: 3.7959	D(x): 0.7970	D(G(z)): 0.0265 / 0.0454
[3/100][100/169]	Loss_D: 1.4437	Loss_G: 8.6938	D(x): 0.9704	D(G(z)): 0.6821 / 0.0005
[3/100][150/169]	Loss_D: 0.3822	Loss_G: 4.6758	D(x): 0.9122	D(G(z)): 0.2160 / 0.0162
[4/100][0/169]	Loss_D: 0.4191	Loss_G: 5.5850	D(x): 0.8309	D(G(z)): 0.1681 / 0.0057
[4/100][50/169]	Loss_D: 0.3044	Loss_G: 3.3362	D(x): 0.8211	D(G(z)): 0.0387 / 0.0636
[4/100][100/169]	Loss_D: 0.4289	Loss_G: 2.8292	D(x): 0.7626	D(G(z)): 0.0745 / 0.0857
[4/100][150/169]	Loss_D: 0.5065	Loss_G: 4.6679	D(x): 0.7007	D(G(z)): 0.0384 / 0.0163
[5/100][0/169]	Loss_D: 0.3965	Loss_G: 4.3955	D(x): 0.7601	D(G(z)): 0.0057 / 0.0238
[5/100][50/169]	Loss_D: 0.3727	Loss_G: 3.1495	D(x): 0.8383	D(G(z)): 0.1024 / 0.0733
[5/100][100/169]	Loss_D: 0.6160	Loss_G: 5.8048	D(x): 0.6386	D(G(z)): 0.0072 / 0.0045
[5/100][150/169]	Loss_D: 0.5382	Loss_G: 4.5008	D(x): 0.7410	D(G(z)): 0.1098 / 0.0218
[6/100][0/169]	Loss_D: 0.3886	Loss_G: 3.9161	D(x): 0.7484	D(G(z)): 0.0280 / 0.0315
[6/100][50/169]	Loss_D: 0.4086	Loss_G: 5.2924	D(x): 0.7498	D(G(z)): 0.0377 / 0.0110
[6/100][100/169]	Loss_D: 0.7245	Loss_G: 6.6505	D(x): 0.5987	D(G(z)): 0.0082 / 0.0038
[6/100][150/169]	Loss_D: 0.8344	Loss_G: 9.2418	D(x): 0.9724	D(G(z)): 0.5004 / 0.0004
[7/100][0/169]	Loss_D: 0.2207	Loss_G: 2.8529	D(x): 0.8854	D(G(z)): 0.0713 / 0.0837
[7/100][50/169]	Loss_D: 0.4642	Loss_G: 4.7408	D(x): 0.7205	D(G(z)): 0.0206 / 0.0139
[7/100][100/169]	Loss_D: 0.3166	Loss_G: 5.8089	D(x): 0.8708	D(G(z)): 0.1294 / 0.0054
[7/100][150/169]	Loss_D: 0.3912	Loss_G: 4.8157	D(x): 0.7758	D(G(z)): 0.0571 / 0.0134
[8/100][0/169]	Loss_D: 0.2119	Loss_G: 3.8395	D(x): 0.9108	D(G(z)): 0.0705 / 0.0415
[8/100][50/169]	Loss_D: 0.2961	Loss_G: 5.9895	D(x): 0.9836	D(G(z)): 0.2173 / 0.0045
[8/100][100/169]	Loss_D: 0.2303	Loss_G: 5.6902	D(x): 0.9215	D(G(z)): 0.1162 / 0.0049
[8/100][150/169]	Loss_D: 0.3705	Loss_G: 7.3056	D(x): 0.7563	D(G(z)): 0.0009 / 0.0057
[9/100][0/169]	Loss_D: 0.3374	Loss_G: 5.5456	D(x): 0.8899	D(G(z)): 0.1519 / 0.0079
[9/100][50/169]	Loss_D: 0.2532	Loss_G: 5.1744	D(x): 0.9009	D(G(z)): 0.1152 / 0.0092
[9/100][100/169]	Loss_D: 0.2379	Loss_G: 6.8116	D(x): 0.9569	D(G(z)): 0.1595 / 0.0022
[9/100][150/169]	Loss_D: 0.1742	Loss_G: 6.0191	D(x): 0.9128	D(G(z)): 0.0447 / 0.0052
[10/100][0/169]	Loss_D: 1.2699	Loss_G: 7.6456	D(x): 0.4159	D(G(z)): 0.0001 / 0.0008
[10/100][50/169]	Loss_D: 0.2088	Loss_G: 5.6898	D(x): 0.9007	D(G(z)): 0.0601 / 0.0054
[10/100][100/169]	Loss_D: 0.2460	Loss_G: 5.7445	D(x): 0.8317	D(G(z)): 0.0121 / 0.0054
[10/100][150/169]	Loss_D: 0.2522	Loss_G: 5.5821	D(x): 0.8750	D(G(z)): 0.0587 / 0.0071
[11/100][0/169]	Loss_D: 0.6916	Loss_G: 5.6097	D(x): 0.6445	D(G(z)): 0.0017 / 0.0072
[11/100][50/169]	Loss_D: 0.0658	Loss_G: 6.3250	D(x): 0.9620	D(G(z)): 0.0233 / 0.0032
[11/100][100/169]	Loss_D: 0.4864	Loss_G: 8.3929	D(x): 0.7066	D(G(z)): 0.0017 / 0.0008
[11/100][150/169]	Loss_D: 0.2236	Loss_G: 5.3426	D(x): 0.8980	D(G(z)): 0.0797 / 0.0092
[12/100][0/169]	Loss_D: 0.1159	Loss_G: 7.1035	D(x): 0.9633	D(G(z)): 0.0641 / 0.0020
[12/100][50/169]	Loss_D: 0.4185	Loss_G: 8.1586	D(x): 0.9466	D(G(z)): 0.2615 / 0.0006
[12/100][100/169]	Loss_D: 0.3488	Loss_G: 7.8809	D(x): 0.9586	D(G(z)): 0.2235 / 0.0008
[12/100][150/169]	Loss_D: 0.4281	Loss_G: 8.6445	D(x): 0.9538	D(G(z)): 0.2614 / 0.0004
[13/100][0/169]	Loss_D: 0.0435	Loss_G: 2.7508	D(x): 0.9960	D(G(z)): 0.0366 / 0.1329
[13/100][50/169]	Loss_D: 0.3982	Loss_G: 7.1999	D(x): 0.8719	D(G(z)): 0.1838 / 0.0013
[13/100][100/169]	Loss_D: 0.2503	Loss_G: 6.3538	D(x): 0.9097	D(G(z)): 0.1232 / 0.0031
[13/100][150/169]	Loss_D: 0.2894	Loss_G: 4.2860	D(x): 0.8120	D(G(z)): 0.0197 / 0.0240
[14/100][0/169]	Loss_D: 0.6974	Loss_G: 10.5713	D(x): 0.9540	D(G(z)): 0.3966 / 0.0001
[14/100][50/169]	Loss_D: 0.2229	Loss_G: 7.8880	D(x): 0.8533	D(G(z)): 0.0034 / 0.0010
[14/100][100/169]	Loss_D: 0.2160	Loss_G: 4.8785	D(x): 0.9368	D(G(z)): 0.1222 / 0.0119
[14/100][150/169]	Loss_D: 0.2090	Loss_G: 5.6448	D(x): 0.9880	D(G(z)): 0.1554 / 0.0083
[15/100][0/169]	Loss_D: 0.4765	Loss_G: 8.7479	D(x): 0.9671	D(G(z)): 0.3009 / 0.0005
[15/100][50/169]	Loss_D: 0.3382	Loss_G: 6.8702	D(x): 0.9387	D(G(z)): 0.2048 / 0.0027
[15/100][100/169]	Loss_D: 0.2026	Loss_G: 5.5892	D(x): 0.8833	D(G(z)): 0.0301 / 0.0090
[15/100][150/169]	Loss_D: 0.2513	Loss_G: 5.1948	D(x): 0.8390	D(G(z)): 0.0243 / 0.0106
[16/100][0/169]	Loss_D: 0.2206	Loss_G: 3.7748	D(x): 0.9891	D(G(z)): 0.1663 / 0.0512
[16/100][50/169]	Loss_D: 0.5516	Loss_G: 2.3858	D(x): 0.6827	D(G(z)): 0.0327 / 0.1397
[16/100][100/169]	Loss_D: 0.3780	Loss_G: 5.3595	D(x): 0.7540	D(G(z)): 0.0096 / 0.0109
[16/100][150/169]	Loss_D: 0.2175	Loss_G: 3.4724	D(x): 0.8716	D(G(z)): 0.0515 / 0.0594
[17/100][0/169]	Loss_D: 0.2377	Loss_G: 4.2305	D(x): 0.8822	D(G(z)): 0.0774 / 0.0278
[17/100][50/169]	Loss_D: 1.0107	Loss_G: 4.6681	D(x): 0.4974	D(G(z)): 0.0004 / 0.0254
[17/100][100/169]	Loss_D: 0.3539	Loss_G: 4.5377	D(x): 0.8375	D(G(z)): 0.1113 / 0.0203
[17/100][150/169]	Loss_D: 0.3121	Loss_G: 5.5299	D(x): 0.9777	D(G(z)): 0.2104 / 0.0091
[18/100][0/169]	Loss_D: 0.3494	Loss_G: 2.9686	D(x): 0.7942	D(G(z)): 0.0595 / 0.0883
[18/100][50/169]	Loss_D: 0.3402	Loss_G: 7.0350	D(x): 0.9828	D(G(z)): 0.2196 / 0.0034
[18/100][100/169]	Loss_D: 0.5579	Loss_G: 2.7214	D(x): 0.6919	D(G(z)): 0.0515 / 0.1131
[18/100][150/169]	Loss_D: 0.1823	Loss_G: 4.3605	D(x): 0.8889	D(G(z)): 0.0363 / 0.0325
[19/100][0/169]	Loss_D: 0.2286	Loss_G: 3.9951	D(x): 0.8633	D(G(z)): 0.0506 / 0.0331
[19/100][50/169]	Loss_D: 0.6775	Loss_G: 8.4842	D(x): 0.9963	D(G(z)): 0.4113 / 0.0006
[19/100][100/169]	Loss_D: 0.2843	Loss_G: 4.5088	D(x): 0.8705	D(G(z)): 0.1112 / 0.0186
[19/100][150/169]	Loss_D: 0.4771	Loss_G: 2.7656	D(x): 0.7145	D(G(z)): 0.0487 / 0.1040
[20/100][0/169]	Loss_D: 0.5391	Loss_G: 8.3019	D(x): 0.9501	D(G(z)): 0.3237 / 0.0007
[20/100][50/169]	Loss_D: 0.2033	Loss_G: 5.6174	D(x): 0.9603	D(G(z)): 0.1295 / 0.0081
[20/100][100/169]	Loss_D: 1.5641	Loss_G: 6.4305	D(x): 0.3424	D(G(z)): 0.0001 / 0.0045
[20/100][150/169]	Loss_D: 0.2940	Loss_G: 4.3052	D(x): 0.8152	D(G(z)): 0.0282 / 0.0273
[21/100][0/169]	Loss_D: 0.6546	Loss_G: 1.4966	D(x): 0.6210	D(G(z)): 0.0365 / 0.3064
[21/100][50/169]	Loss_D: 0.3268	Loss_G: 5.8027	D(x): 0.7714	D(G(z)): 0.0054 / 0.0067
[21/100][100/169]	Loss_D: 0.1925	Loss_G: 5.2981	D(x): 0.8736	D(G(z)): 0.0320 / 0.0104
[21/100][150/169]	Loss_D: 0.6131	Loss_G: 8.8972	D(x): 0.9541	D(G(z)): 0.3616 / 0.0004
[22/100][0/169]	Loss_D: 0.3423	Loss_G: 6.0979	D(x): 0.9572	D(G(z)): 0.2287 / 0.0037
[22/100][50/169]	Loss_D: 0.3125	Loss_G: 6.2417	D(x): 0.9819	D(G(z)): 0.2323 / 0.0036
[22/100][100/169]	Loss_D: 0.3715	Loss_G: 7.8959	D(x): 0.9443	D(G(z)): 0.2273 / 0.0009
[22/100][150/169]	Loss_D: 0.4372	Loss_G: 3.1864	D(x): 0.7881	D(G(z)): 0.0805 / 0.1038
[23/100][0/169]	Loss_D: 0.6363	Loss_G: 6.1329	D(x): 0.8418	D(G(z)): 0.2890 / 0.0040
[23/100][50/169]	Loss_D: 0.2927	Loss_G: 4.9553	D(x): 0.8987	D(G(z)): 0.1399 / 0.0131
[23/100][100/169]	Loss_D: 0.3765	Loss_G: 6.4050	D(x): 0.9466	D(G(z)): 0.2399 / 0.0033
[23/100][150/169]	Loss_D: 0.4234	Loss_G: 6.6352	D(x): 0.9375	D(G(z)): 0.2560 / 0.0025
[24/100][0/169]	Loss_D: 0.1797	Loss_G: 4.0571	D(x): 0.8693	D(G(z)): 0.0128 / 0.0523
[24/100][50/169]	Loss_D: 0.2666	Loss_G: 4.1186	D(x): 0.9019	D(G(z)): 0.1285 / 0.0264
[24/100][100/169]	Loss_D: 0.5176	Loss_G: 3.2364	D(x): 0.6761	D(G(z)): 0.0072 / 0.0675
[24/100][150/169]	Loss_D: 0.4208	Loss_G: 4.6694	D(x): 0.8689	D(G(z)): 0.1871 / 0.0175
[25/100][0/169]	Loss_D: 0.2666	Loss_G: 3.9522	D(x): 0.9035	D(G(z)): 0.1268 / 0.0331
[25/100][50/169]	Loss_D: 1.1566	Loss_G: 11.5124	D(x): 0.9761	D(G(z)): 0.5829 / 0.0000
[25/100][100/169]	Loss_D: 0.8011	Loss_G: 10.2435	D(x): 0.9776	D(G(z)): 0.4713 / 0.0001
[25/100][150/169]	Loss_D: 0.4663	Loss_G: 3.5794	D(x): 0.7126	D(G(z)): 0.0299 / 0.0586
[26/100][0/169]	Loss_D: 0.1811	Loss_G: 3.6715	D(x): 0.9530	D(G(z)): 0.1110 / 0.0443
[26/100][50/169]	Loss_D: 0.3467	Loss_G: 4.0712	D(x): 0.8567	D(G(z)): 0.1378 / 0.0312
[26/100][100/169]	Loss_D: 0.2508	Loss_G: 5.0595	D(x): 0.9273	D(G(z)): 0.1353 / 0.0119
[26/100][150/169]	Loss_D: 0.5994	Loss_G: 1.9457	D(x): 0.6363	D(G(z)): 0.0105 / 0.2417
[27/100][0/169]	Loss_D: 0.2678	Loss_G: 6.5884	D(x): 0.8151	D(G(z)): 0.0190 / 0.0065
[27/100][50/169]	Loss_D: 0.2664	Loss_G: 5.4144	D(x): 0.9570	D(G(z)): 0.1715 / 0.0090
[27/100][100/169]	Loss_D: 0.2956	Loss_G: 5.3417	D(x): 0.9382	D(G(z)): 0.1839 / 0.0079
[27/100][150/169]	Loss_D: 0.4548	Loss_G: 7.1995	D(x): 0.9426	D(G(z)): 0.2691 / 0.0021
[28/100][0/169]	Loss_D: 0.4279	Loss_G: 5.4454	D(x): 0.9132	D(G(z)): 0.2427 / 0.0080
[28/100][50/169]	Loss_D: 0.3129	Loss_G: 4.0581	D(x): 0.8958	D(G(z)): 0.1435 / 0.0347
[28/100][100/169]	Loss_D: 0.2153	Loss_G: 3.8973	D(x): 0.8891	D(G(z)): 0.0756 / 0.0335
[28/100][150/169]	Loss_D: 0.3133	Loss_G: 6.0021	D(x): 0.9221	D(G(z)): 0.1691 / 0.0058
[29/100][0/169]	Loss_D: 0.6044	Loss_G: 8.7170	D(x): 0.9688	D(G(z)): 0.3593 / 0.0004
[29/100][50/169]	Loss_D: 0.4692	Loss_G: 6.5249	D(x): 0.9541	D(G(z)): 0.3044 / 0.0027
[29/100][100/169]	Loss_D: 0.3631	Loss_G: 6.7937	D(x): 0.9756	D(G(z)): 0.2404 / 0.0024
[29/100][150/169]	Loss_D: 0.3046	Loss_G: 4.1635	D(x): 0.8798	D(G(z)): 0.1364 / 0.0250
[30/100][0/169]	Loss_D: 0.2221	Loss_G: 4.4225	D(x): 0.9084	D(G(z)): 0.1013 / 0.0208
[30/100][50/169]	Loss_D: 0.3458	Loss_G: 6.3280	D(x): 0.9355	D(G(z)): 0.2058 / 0.0033
[30/100][100/169]	Loss_D: 0.3021	Loss_G: 4.1825	D(x): 0.8756	D(G(z)): 0.1127 / 0.0344
[30/100][150/169]	Loss_D: 0.7642	Loss_G: 7.9913	D(x): 0.9796	D(G(z)): 0.4575 / 0.0008
[31/100][0/169]	Loss_D: 0.5754	Loss_G: 8.0083	D(x): 0.9716	D(G(z)): 0.3600 / 0.0007
[31/100][50/169]	Loss_D: 0.2684	Loss_G: 3.8316	D(x): 0.8625	D(G(z)): 0.0833 / 0.0401
[31/100][100/169]	Loss_D: 2.9724	Loss_G: 2.6500	D(x): 0.1325	D(G(z)): 0.0007 / 0.1529
[31/100][150/169]	Loss_D: 0.4299	Loss_G: 2.9887	D(x): 0.7441	D(G(z)): 0.0582 / 0.0850
[32/100][0/169]	Loss_D: 0.4095	Loss_G: 4.7944	D(x): 0.9150	D(G(z)): 0.2254 / 0.0151
[32/100][50/169]	Loss_D: 0.2511	Loss_G: 3.4000	D(x): 0.8429	D(G(z)): 0.0562 / 0.0511
[32/100][100/169]	Loss_D: 0.4676	Loss_G: 2.8543	D(x): 0.8628	D(G(z)): 0.1830 / 0.1374
[32/100][150/169]	Loss_D: 0.3379	Loss_G: 3.3529	D(x): 0.8448	D(G(z)): 0.1198 / 0.0609
[33/100][0/169]	Loss_D: 0.1783	Loss_G: 4.5155	D(x): 0.9614	D(G(z)): 0.1181 / 0.0197
[33/100][50/169]	Loss_D: 0.1563	Loss_G: 4.1574	D(x): 0.9439	D(G(z)): 0.0857 / 0.0246
[33/100][100/169]	Loss_D: 0.3832	Loss_G: 6.3546	D(x): 0.9456	D(G(z)): 0.2488 / 0.0034
[33/100][150/169]	Loss_D: 0.3027	Loss_G: 4.1943	D(x): 0.8638	D(G(z)): 0.1115 / 0.0282
[34/100][0/169]	Loss_D: 0.2035	Loss_G: 5.2917	D(x): 0.8823	D(G(z)): 0.0529 / 0.0138
[34/100][50/169]	Loss_D: 0.3753	Loss_G: 3.1252	D(x): 0.8096	D(G(z)): 0.1079 / 0.0737
[34/100][100/169]	Loss_D: 0.2520	Loss_G: 2.9324	D(x): 0.8581	D(G(z)): 0.0717 / 0.0796
[34/100][150/169]	Loss_D: 2.6704	Loss_G: 0.3080	D(x): 0.1417	D(G(z)): 0.0081 / 0.7982
[35/100][0/169]	Loss_D: 0.4015	Loss_G: 4.4057	D(x): 0.8726	D(G(z)): 0.1852 / 0.0225
[35/100][50/169]	Loss_D: 0.2446	Loss_G: 3.2908	D(x): 0.8529	D(G(z)): 0.0662 / 0.0567
[35/100][100/169]	Loss_D: 0.2442	Loss_G: 3.3527	D(x): 0.8523	D(G(z)): 0.0629 / 0.0555
[35/100][150/169]	Loss_D: 0.2574	Loss_G: 4.0808	D(x): 0.9534	D(G(z)): 0.1642 / 0.0273
[36/100][0/169]	Loss_D: 0.7984	Loss_G: 8.6475	D(x): 0.9743	D(G(z)): 0.4680 / 0.0004
[36/100][50/169]	Loss_D: 0.3098	Loss_G: 4.9299	D(x): 0.9103	D(G(z)): 0.1593 / 0.0141
[36/100][100/169]	Loss_D: 0.2901	Loss_G: 3.5219	D(x): 0.7986	D(G(z)): 0.0206 / 0.0488
[36/100][150/169]	Loss_D: 0.3704	Loss_G: 4.4511	D(x): 0.8777	D(G(z)): 0.1732 / 0.0186
[37/100][0/169]	Loss_D: 1.3709	Loss_G: 0.4411	D(x): 0.3806	D(G(z)): 0.0244 / 0.7137
[37/100][50/169]	Loss_D: 0.2318	Loss_G: 4.1217	D(x): 0.9036	D(G(z)): 0.1023 / 0.0317
[37/100][100/169]	Loss_D: 0.1967	Loss_G: 3.8272	D(x): 0.9100	D(G(z)): 0.0789 / 0.0362
[37/100][150/169]	Loss_D: 0.2439	Loss_G: 3.1096	D(x): 0.8784	D(G(z)): 0.0862 / 0.0722
[38/100][0/169]	Loss_D: 0.2323	Loss_G: 3.3431	D(x): 0.8987	D(G(z)): 0.0944 / 0.0615
[38/100][50/169]	Loss_D: 0.3183	Loss_G: 2.9349	D(x): 0.8150	D(G(z)): 0.0709 / 0.0809
[38/100][100/169]	Loss_D: 0.1950	Loss_G: 4.4115	D(x): 0.9510	D(G(z)): 0.1261 / 0.0184
[38/100][150/169]	Loss_D: 0.2922	Loss_G: 3.5300	D(x): 0.8714	D(G(z)): 0.1006 / 0.0490
[39/100][0/169]	Loss_D: 0.2488	Loss_G: 5.3183	D(x): 0.9738	D(G(z)): 0.1786 / 0.0080
[39/100][50/169]	Loss_D: 0.3614	Loss_G: 4.2107	D(x): 0.8281	D(G(z)): 0.1175 / 0.0275
[39/100][100/169]	Loss_D: 0.2612	Loss_G: 3.7284	D(x): 0.8262	D(G(z)): 0.0457 / 0.0469
[39/100][150/169]	Loss_D: 0.2365	Loss_G: 3.6699	D(x): 0.9050	D(G(z)): 0.1057 / 0.0414
[40/100][0/169]	Loss_D: 0.1524	Loss_G: 4.4152	D(x): 0.9010	D(G(z)): 0.0346 / 0.0238
[40/100][50/169]	Loss_D: 0.1933	Loss_G: 4.2249	D(x): 0.8548	D(G(z)): 0.0182 / 0.0280
[40/100][100/169]	Loss_D: 1.8717	Loss_G: 3.0545	D(x): 0.2407	D(G(z)): 0.0027 / 0.1053
[40/100][150/169]	Loss_D: 0.3701	Loss_G: 5.9184	D(x): 0.9662	D(G(z)): 0.2552 / 0.0045
[41/100][0/169]	Loss_D: 0.2003	Loss_G: 3.6155	D(x): 0.8839	D(G(z)): 0.0618 / 0.0504
[41/100][50/169]	Loss_D: 0.2264	Loss_G: 4.3175	D(x): 0.9311	D(G(z)): 0.1287 / 0.0214
[41/100][100/169]	Loss_D: 0.4321	Loss_G: 3.7392	D(x): 0.7058	D(G(z)): 0.0113 / 0.0432
[41/100][150/169]	Loss_D: 0.2327	Loss_G: 4.4422	D(x): 0.8635	D(G(z)): 0.0591 / 0.0239
[42/100][0/169]	Loss_D: 0.2650	Loss_G: 3.9069	D(x): 0.9127	D(G(z)): 0.1371 / 0.0329
[42/100][50/169]	Loss_D: 0.5007	Loss_G: 2.8017	D(x): 0.6707	D(G(z)): 0.0098 / 0.0957
[42/100][100/169]	Loss_D: 0.2235	Loss_G: 3.9909	D(x): 0.9691	D(G(z)): 0.1553 / 0.0375
[42/100][150/169]	Loss_D: 0.1668	Loss_G: 4.0619	D(x): 0.9004	D(G(z)): 0.0502 / 0.0273
[43/100][0/169]	Loss_D: 0.2363	Loss_G: 2.6813	D(x): 0.8342	D(G(z)): 0.0357 / 0.1050
[43/100][50/169]	Loss_D: 0.3010	Loss_G: 2.3459	D(x): 0.8093	D(G(z)): 0.0593 / 0.1362
[43/100][100/169]	Loss_D: 1.6561	Loss_G: 2.2546	D(x): 0.3412	D(G(z)): 0.0158 / 0.2550
[43/100][150/169]	Loss_D: 0.4018	Loss_G: 4.7840	D(x): 0.9275	D(G(z)): 0.2451 / 0.0126
[44/100][0/169]	Loss_D: 0.2410	Loss_G: 3.1185	D(x): 0.8428	D(G(z)): 0.0505 / 0.0679
[44/100][50/169]	Loss_D: 0.2384	Loss_G: 2.9868	D(x): 0.8589	D(G(z)): 0.0647 / 0.0768
[44/100][100/169]	Loss_D: 0.3440	Loss_G: 5.5007	D(x): 0.9855	D(G(z)): 0.2558 / 0.0062
[44/100][150/169]	Loss_D: 0.1961	Loss_G: 4.4909	D(x): 0.9734	D(G(z)): 0.1407 / 0.0173
[45/100][0/169]	Loss_D: 2.5049	Loss_G: 9.9598	D(x): 0.9608	D(G(z)): 0.8262 / 0.0002
[45/100][50/169]	Loss_D: 0.2740	Loss_G: 3.2615	D(x): 0.8776	D(G(z)): 0.1089 / 0.0647
[45/100][100/169]	Loss_D: 0.2712	Loss_G: 4.0762	D(x): 0.9347	D(G(z)): 0.1677 / 0.0249
[45/100][150/169]	Loss_D: 0.2287	Loss_G: 4.1636	D(x): 0.9366	D(G(z)): 0.1348 / 0.0235
[46/100][0/169]	Loss_D: 1.1658	Loss_G: 10.7726	D(x): 0.9883	D(G(z)): 0.5745 / 0.0000
[46/100][50/169]	Loss_D: 0.2198	Loss_G: 3.7012	D(x): 0.9048	D(G(z)): 0.0965 / 0.0398
[46/100][100/169]	Loss_D: 0.2286	Loss_G: 3.0148	D(x): 0.8792	D(G(z)): 0.0715 / 0.0743
[46/100][150/169]	Loss_D: 0.3864	Loss_G: 2.0260	D(x): 0.7552	D(G(z)): 0.0467 / 0.1902
[47/100][0/169]	Loss_D: 0.2439	Loss_G: 4.6623	D(x): 0.9309	D(G(z)): 0.1428 / 0.0156
[47/100][50/169]	Loss_D: 0.2594	Loss_G: 4.0035	D(x): 0.9904	D(G(z)): 0.1964 / 0.0288
[47/100][100/169]	Loss_D: 1.1647	Loss_G: 4.5627	D(x): 0.8348	D(G(z)): 0.5351 / 0.0170
[47/100][150/169]	Loss_D: 0.3022	Loss_G: 3.6001	D(x): 0.8050	D(G(z)): 0.0497 / 0.0480
[48/100][0/169]	Loss_D: 0.7024	Loss_G: 6.9747	D(x): 0.9902	D(G(z)): 0.4391 / 0.0019
[48/100][50/169]	Loss_D: 0.2244	Loss_G: 4.0781	D(x): 0.9458	D(G(z)): 0.1444 / 0.0241
[48/100][100/169]	Loss_D: 0.1968	Loss_G: 3.9435	D(x): 0.9435	D(G(z)): 0.1179 / 0.0306
[48/100][150/169]	Loss_D: 0.7041	Loss_G: 6.4506	D(x): 0.9751	D(G(z)): 0.4060 / 0.0034
[49/100][0/169]	Loss_D: 0.2782	Loss_G: 4.9949	D(x): 0.9469	D(G(z)): 0.1757 / 0.0110
[49/100][50/169]	Loss_D: 0.2773	Loss_G: 5.6056	D(x): 0.9687	D(G(z)): 0.1959 / 0.0059
[49/100][100/169]	Loss_D: 0.3440	Loss_G: 4.6302	D(x): 0.9145	D(G(z)): 0.2018 / 0.0151
[49/100][150/169]	Loss_D: 0.2181	Loss_G: 3.7885	D(x): 0.9153	D(G(z)): 0.1104 / 0.0332
[50/100][0/169]	Loss_D: 0.2176	Loss_G: 4.0546	D(x): 0.9210	D(G(z)): 0.1129 / 0.0278
[50/100][50/169]	Loss_D: 0.1842	Loss_G: 3.6935	D(x): 0.9241	D(G(z)): 0.0847 / 0.0419
[50/100][100/169]	Loss_D: 0.2364	Loss_G: 4.5341	D(x): 0.9339	D(G(z)): 0.1396 / 0.0159
[50/100][150/169]	Loss_D: 0.1912	Loss_G: 3.5007	D(x): 0.9073	D(G(z)): 0.0746 / 0.0504
[51/100][0/169]	Loss_D: 0.1420	Loss_G: 4.3411	D(x): 0.9477	D(G(z)): 0.0785 / 0.0214
[51/100][50/169]	Loss_D: 0.1483	Loss_G: 3.6631	D(x): 0.9356	D(G(z)): 0.0733 / 0.0377
[51/100][100/169]	Loss_D: 0.1498	Loss_G: 4.3975	D(x): 0.9679	D(G(z)): 0.1019 / 0.0194
[51/100][150/169]	Loss_D: 0.7950	Loss_G: 8.1680	D(x): 0.9779	D(G(z)): 0.4567 / 0.0005
[52/100][0/169]	Loss_D: 0.3003	Loss_G: 2.9769	D(x): 0.8243	D(G(z)): 0.0737 / 0.0872
[52/100][50/169]	Loss_D: 0.2204	Loss_G: 4.3449	D(x): 0.9546	D(G(z)): 0.1432 / 0.0184
[52/100][100/169]	Loss_D: 0.3131	Loss_G: 4.9491	D(x): 0.9555	D(G(z)): 0.2060 / 0.0111
[52/100][150/169]	Loss_D: 0.1644	Loss_G: 4.1711	D(x): 0.9643	D(G(z)): 0.1128 / 0.0232
[53/100][0/169]	Loss_D: 0.1992	Loss_G: 2.8123	D(x): 0.8748	D(G(z)): 0.0513 / 0.1037
[53/100][50/169]	Loss_D: 0.3527	Loss_G: 3.9243	D(x): 0.8663	D(G(z)): 0.1538 / 0.0339
[53/100][100/169]	Loss_D: 0.2011	Loss_G: 3.8297	D(x): 0.9052	D(G(z)): 0.0843 / 0.0364
[53/100][150/169]	Loss_D: 0.2012	Loss_G: 2.9150	D(x): 0.8592	D(G(z)): 0.0352 / 0.0784
[54/100][0/169]	Loss_D: 0.1754	Loss_G: 3.4928	D(x): 0.9149	D(G(z)): 0.0726 / 0.0440
[54/100][50/169]	Loss_D: 0.8924	Loss_G: 1.9936	D(x): 0.6619	D(G(z)): 0.2492 / 0.1988
[54/100][100/169]	Loss_D: 0.2373	Loss_G: 4.2147	D(x): 0.9436	D(G(z)): 0.1487 / 0.0226
[54/100][150/169]	Loss_D: 0.1887	Loss_G: 3.4221	D(x): 0.8921	D(G(z)): 0.0617 / 0.0506
[55/100][0/169]	Loss_D: 0.8188	Loss_G: 0.5874	D(x): 0.5220	D(G(z)): 0.0042 / 0.6417
[55/100][50/169]	Loss_D: 0.2162	Loss_G: 3.4614	D(x): 0.8744	D(G(z)): 0.0632 / 0.0530
[55/100][100/169]	Loss_D: 0.2739	Loss_G: 3.6360	D(x): 0.8166	D(G(z)): 0.0395 / 0.0412
[55/100][150/169]	Loss_D: 0.2445	Loss_G: 4.8363	D(x): 0.9621	D(G(z)): 0.1678 / 0.0124
[56/100][0/169]	Loss_D: 0.2409	Loss_G: 3.2228	D(x): 0.8123	D(G(z)): 0.0149 / 0.0692
[56/100][50/169]	Loss_D: 2.9852	Loss_G: 0.5704	D(x): 0.0914	D(G(z)): 0.0002 / 0.6292
[56/100][100/169]	Loss_D: 0.1939	Loss_G: 4.2963	D(x): 0.9427	D(G(z)): 0.1138 / 0.0214
[56/100][150/169]	Loss_D: 0.2062	Loss_G: 4.2890	D(x): 0.9586	D(G(z)): 0.1410 / 0.0207
[57/100][0/169]	Loss_D: 0.1340	Loss_G: 4.2215	D(x): 0.9652	D(G(z)): 0.0855 / 0.0250
[57/100][50/169]	Loss_D: 0.2307	Loss_G: 3.5092	D(x): 0.8880	D(G(z)): 0.0920 / 0.0479
[57/100][100/169]	Loss_D: 0.1607	Loss_G: 4.1553	D(x): 0.9424	D(G(z)): 0.0900 / 0.0221
[57/100][150/169]	Loss_D: 0.5351	Loss_G: 2.4022	D(x): 0.7098	D(G(z)): 0.0892 / 0.1432
[58/100][0/169]	Loss_D: 0.4422	Loss_G: 3.7754	D(x): 0.6987	D(G(z)): 0.0238 / 0.0470
[58/100][50/169]	Loss_D: 0.1642	Loss_G: 4.1601	D(x): 0.9416	D(G(z)): 0.0897 / 0.0248
[58/100][100/169]	Loss_D: 0.1591	Loss_G: 4.0570	D(x): 0.9667	D(G(z)): 0.1087 / 0.0274
[58/100][150/169]	Loss_D: 0.1487	Loss_G: 3.8290	D(x): 0.9431	D(G(z)): 0.0796 / 0.0360
[59/100][0/169]	Loss_D: 1.1204	Loss_G: 1.2184	D(x): 0.4037	D(G(z)): 0.0013 / 0.3863
[59/100][50/169]	Loss_D: 0.2442	Loss_G: 2.9321	D(x): 0.8333	D(G(z)): 0.0416 / 0.0954
[59/100][100/169]	Loss_D: 0.1249	Loss_G: 3.7119	D(x): 0.9240	D(G(z)): 0.0400 / 0.0403
[59/100][150/169]	Loss_D: 1.6398	Loss_G: 1.5575	D(x): 0.3095	D(G(z)): 0.0331 / 0.2994
[60/100][0/169]	Loss_D: 0.2707	Loss_G: 4.8529	D(x): 0.9118	D(G(z)): 0.1429 / 0.0149
[60/100][50/169]	Loss_D: 0.1543	Loss_G: 3.8394	D(x): 0.9190	D(G(z)): 0.0603 / 0.0351
[60/100][100/169]	Loss_D: 0.2176	Loss_G: 2.5596	D(x): 0.8573	D(G(z)): 0.0451 / 0.1172
[60/100][150/169]	Loss_D: 0.1571	Loss_G: 5.1060	D(x): 0.9298	D(G(z)): 0.0711 / 0.0129
[61/100][0/169]	Loss_D: 0.1296	Loss_G: 4.2953	D(x): 0.9157	D(G(z)): 0.0345 / 0.0236
[61/100][50/169]	Loss_D: 0.1726	Loss_G: 4.1301	D(x): 0.8786	D(G(z)): 0.0273 / 0.0296
[61/100][100/169]	Loss_D: 0.1350	Loss_G: 4.9083	D(x): 0.9674	D(G(z)): 0.0882 / 0.0127
[61/100][150/169]	Loss_D: 0.1215	Loss_G: 4.2024	D(x): 0.9362	D(G(z)): 0.0477 / 0.0293
[62/100][0/169]	Loss_D: 0.3728	Loss_G: 1.5666	D(x): 0.7270	D(G(z)): 0.0068 / 0.3147
[62/100][50/169]	Loss_D: 0.3556	Loss_G: 3.1591	D(x): 0.7652	D(G(z)): 0.0334 / 0.0829
[62/100][100/169]	Loss_D: 0.2310	Loss_G: 3.0423	D(x): 0.8646	D(G(z)): 0.0671 / 0.0758
[62/100][150/169]	Loss_D: 0.4914	Loss_G: 3.0107	D(x): 0.9141	D(G(z)): 0.2525 / 0.0895
[63/100][0/169]	Loss_D: 0.2932	Loss_G: 4.1345	D(x): 0.8506	D(G(z)): 0.0940 / 0.0279
[63/100][50/169]	Loss_D: 0.1631	Loss_G: 3.5140	D(x): 0.9179	D(G(z)): 0.0668 / 0.0515
[63/100][100/169]	Loss_D: 0.1463	Loss_G: 4.4576	D(x): 0.9551	D(G(z)): 0.0872 / 0.0201
[63/100][150/169]	Loss_D: 0.1162	Loss_G: 3.9560	D(x): 0.9490	D(G(z)): 0.0580 / 0.0296
[64/100][0/169]	Loss_D: 0.6396	Loss_G: 2.3794	D(x): 0.5883	D(G(z)): 0.0073 / 0.1507
[64/100][50/169]	Loss_D: 0.1623	Loss_G: 3.5788	D(x): 0.9319	D(G(z)): 0.0793 / 0.0402
[64/100][100/169]	Loss_D: 0.1280	Loss_G: 3.9435	D(x): 0.9568	D(G(z)): 0.0753 / 0.0345
[64/100][150/169]	Loss_D: 0.1133	Loss_G: 3.8448	D(x): 0.9604	D(G(z)): 0.0658 / 0.0324
[65/100][0/169]	Loss_D: 0.7487	Loss_G: 5.1327	D(x): 0.7803	D(G(z)): 0.2900 / 0.0152
[65/100][50/169]	Loss_D: 0.5856	Loss_G: 6.7438	D(x): 0.9875	D(G(z)): 0.3676 / 0.0022
[65/100][100/169]	Loss_D: 0.2336	Loss_G: 2.4998	D(x): 0.8292	D(G(z)): 0.0260 / 0.1237
[65/100][150/169]	Loss_D: 0.1628	Loss_G: 3.2171	D(x): 0.9104	D(G(z)): 0.0567 / 0.0625
[66/100][0/169]	Loss_D: 1.3793	Loss_G: 5.2608	D(x): 0.9734	D(G(z)): 0.5491 / 0.0213
[66/100][50/169]	Loss_D: 0.1252	Loss_G: 4.0525	D(x): 0.9273	D(G(z)): 0.0412 / 0.0321
[66/100][100/169]	Loss_D: 0.1223	Loss_G: 4.7263	D(x): 0.9101	D(G(z)): 0.0225 / 0.0158
[66/100][150/169]	Loss_D: 0.1422	Loss_G: 3.6226	D(x): 0.9255	D(G(z)): 0.0573 / 0.0461
[67/100][0/169]	Loss_D: 0.2067	Loss_G: 5.0743	D(x): 0.9892	D(G(z)): 0.1613 / 0.0097
[67/100][50/169]	Loss_D: 0.7875	Loss_G: 5.3730	D(x): 0.8734	D(G(z)): 0.4046 / 0.0113
[67/100][100/169]	Loss_D: 0.1613	Loss_G: 4.0346	D(x): 0.9371	D(G(z)): 0.0824 / 0.0297
[67/100][150/169]	Loss_D: 0.1504	Loss_G: 4.6006	D(x): 0.9669	D(G(z)): 0.0966 / 0.0153
[68/100][0/169]	Loss_D: 0.1130	Loss_G: 4.0778	D(x): 0.9458	D(G(z)): 0.0493 / 0.0289
[68/100][50/169]	Loss_D: 0.1121	Loss_G: 4.3294	D(x): 0.9542	D(G(z)): 0.0584 / 0.0209
[68/100][100/169]	Loss_D: 0.3894	Loss_G: 7.0681	D(x): 0.9616	D(G(z)): 0.2551 / 0.0016
[68/100][150/169]	Loss_D: 0.1472	Loss_G: 4.3505	D(x): 0.9435	D(G(z)): 0.0788 / 0.0200
[69/100][0/169]	Loss_D: 0.1662	Loss_G: 4.6735	D(x): 0.9601	D(G(z)): 0.1065 / 0.0151
[69/100][50/169]	Loss_D: 0.1619	Loss_G: 3.0114	D(x): 0.9020	D(G(z)): 0.0449 / 0.0882
[69/100][100/169]	Loss_D: 0.1518	Loss_G: 3.5087	D(x): 0.8901	D(G(z)): 0.0228 / 0.0507
[69/100][150/169]	Loss_D: 0.1085	Loss_G: 3.0146	D(x): 0.9311	D(G(z)): 0.0301 / 0.0813
[70/100][0/169]	Loss_D: 0.1150	Loss_G: 5.5522	D(x): 0.9808	D(G(z)): 0.0840 / 0.0063
[70/100][50/169]	Loss_D: 0.8756	Loss_G: 2.7033	D(x): 0.8048	D(G(z)): 0.3756 / 0.1333
[70/100][100/169]	Loss_D: 0.6940	Loss_G: 6.4174	D(x): 0.9747	D(G(z)): 0.4260 / 0.0029
[70/100][150/169]	Loss_D: 0.1922	Loss_G: 2.9267	D(x): 0.8894	D(G(z)): 0.0565 / 0.0837
[71/100][0/169]	Loss_D: 0.1356	Loss_G: 3.6725	D(x): 0.9317	D(G(z)): 0.0557 / 0.0458
[71/100][50/169]	Loss_D: 0.7022	Loss_G: 7.0183	D(x): 0.9662	D(G(z)): 0.3953 / 0.0019
[71/100][100/169]	Loss_D: 0.1317	Loss_G: 4.1180	D(x): 0.9382	D(G(z)): 0.0584 / 0.0276
[71/100][150/169]	Loss_D: 0.1509	Loss_G: 3.2030	D(x): 0.9099	D(G(z)): 0.0463 / 0.0690
[72/100][0/169]	Loss_D: 0.1080	Loss_G: 3.9913	D(x): 0.9536	D(G(z)): 0.0550 / 0.0319
[72/100][50/169]	Loss_D: 0.0828	Loss_G: 4.6994	D(x): 0.9565	D(G(z)): 0.0350 / 0.0171
[72/100][100/169]	Loss_D: 0.1247	Loss_G: 3.7236	D(x): 0.9321	D(G(z)): 0.0477 / 0.0417
[72/100][150/169]	Loss_D: 1.1814	Loss_G: 2.9375	D(x): 0.8727	D(G(z)): 0.5237 / 0.1637
[73/100][0/169]	Loss_D: 0.2846	Loss_G: 4.5155	D(x): 0.9342	D(G(z)): 0.1562 / 0.0224
[73/100][50/169]	Loss_D: 0.3467	Loss_G: 3.2092	D(x): 0.7445	D(G(z)): 0.0066 / 0.0729
[73/100][100/169]	Loss_D: 0.2811	Loss_G: 2.6705	D(x): 0.7882	D(G(z)): 0.0113 / 0.1070
[73/100][150/169]	Loss_D: 0.1315	Loss_G: 4.1432	D(x): 0.9543	D(G(z)): 0.0744 / 0.0255
[74/100][0/169]	Loss_D: 0.1068	Loss_G: 4.3908	D(x): 0.9243	D(G(z)): 0.0228 / 0.0257
[74/100][50/169]	Loss_D: 1.3603	Loss_G: 0.8321	D(x): 0.3623	D(G(z)): 0.1151 / 0.5023
[74/100][100/169]	Loss_D: 0.5649	Loss_G: 5.1657	D(x): 0.9664	D(G(z)): 0.3262 / 0.0130
[74/100][150/169]	Loss_D: 0.3777	Loss_G: 2.4975	D(x): 0.8135	D(G(z)): 0.1114 / 0.1253
[75/100][0/169]	Loss_D: 0.2561	Loss_G: 3.3270	D(x): 0.8350	D(G(z)): 0.0439 / 0.0617
[75/100][50/169]	Loss_D: 0.1789	Loss_G: 4.5129	D(x): 0.9742	D(G(z)): 0.1263 / 0.0186
[75/100][100/169]	Loss_D: 0.1187	Loss_G: 3.9485	D(x): 0.9333	D(G(z)): 0.0443 / 0.0369
[75/100][150/169]	Loss_D: 0.1020	Loss_G: 4.6439	D(x): 0.9620	D(G(z)): 0.0568 / 0.0154
[76/100][0/169]	Loss_D: 0.0852	Loss_G: 3.7480	D(x): 0.9652	D(G(z)): 0.0462 / 0.0389
[76/100][50/169]	Loss_D: 0.1068	Loss_G: 3.4903	D(x): 0.9298	D(G(z)): 0.0298 / 0.0492
[76/100][100/169]	Loss_D: 0.1981	Loss_G: 4.5688	D(x): 0.8769	D(G(z)): 0.0498 / 0.0198
[76/100][150/169]	Loss_D: 0.1013	Loss_G: 4.1977	D(x): 0.9575	D(G(z)): 0.0499 / 0.0242
[77/100][0/169]	Loss_D: 0.1071	Loss_G: 5.1738	D(x): 0.9832	D(G(z)): 0.0783 / 0.0103
[77/100][50/169]	Loss_D: 0.1134	Loss_G: 4.6915	D(x): 0.9818	D(G(z)): 0.0852 / 0.0142
[77/100][100/169]	Loss_D: 0.1480	Loss_G: 2.9841	D(x): 0.8902	D(G(z)): 0.0222 / 0.0930
[77/100][150/169]	Loss_D: 0.2356	Loss_G: 4.0112	D(x): 0.8363	D(G(z)): 0.0298 / 0.0370
[78/100][0/169]	Loss_D: 0.2545	Loss_G: 5.5658	D(x): 0.9864	D(G(z)): 0.1858 / 0.0064
[78/100][50/169]	Loss_D: 0.0889	Loss_G: 4.7765	D(x): 0.9625	D(G(z)): 0.0458 / 0.0148
[78/100][100/169]	Loss_D: 0.1123	Loss_G: 4.9672	D(x): 0.9756	D(G(z)): 0.0782 / 0.0112
[78/100][150/169]	Loss_D: 0.2455	Loss_G: 2.8848	D(x): 0.8516	D(G(z)): 0.0634 / 0.1054
[79/100][0/169]	Loss_D: 0.1308	Loss_G: 4.4942	D(x): 0.9444	D(G(z)): 0.0652 / 0.0210
[79/100][50/169]	Loss_D: 0.1262	Loss_G: 4.5302	D(x): 0.9438	D(G(z)): 0.0556 / 0.0204
[79/100][100/169]	Loss_D: 1.0050	Loss_G: 5.8543	D(x): 0.9927	D(G(z)): 0.5516 / 0.0063
[79/100][150/169]	Loss_D: 0.1364	Loss_G: 4.7997	D(x): 0.9614	D(G(z)): 0.0824 / 0.0142
[80/100][0/169]	Loss_D: 0.0733	Loss_G: 4.5421	D(x): 0.9626	D(G(z)): 0.0325 / 0.0194
[80/100][50/169]	Loss_D: 0.0998	Loss_G: 4.6205	D(x): 0.9688	D(G(z)): 0.0608 / 0.0166
[80/100][100/169]	Loss_D: 0.0933	Loss_G: 4.5068	D(x): 0.9523	D(G(z)): 0.0399 / 0.0235
[80/100][150/169]	Loss_D: 0.0681	Loss_G: 3.8369	D(x): 0.9613	D(G(z)): 0.0265 / 0.0336
[81/100][0/169]	Loss_D: 0.0733	Loss_G: 4.4037	D(x): 0.9606	D(G(z)): 0.0307 / 0.0212
[81/100][50/169]	Loss_D: 0.1383	Loss_G: 5.9482	D(x): 0.9899	D(G(z)): 0.1106 / 0.0040
[81/100][100/169]	Loss_D: 0.0801	Loss_G: 4.0939	D(x): 0.9539	D(G(z)): 0.0303 / 0.0281
[81/100][150/169]	Loss_D: 0.7371	Loss_G: 3.6867	D(x): 0.7868	D(G(z)): 0.3250 / 0.0437
[82/100][0/169]	Loss_D: 0.5586	Loss_G: 2.0757	D(x): 0.6593	D(G(z)): 0.0485 / 0.2007
[82/100][50/169]	Loss_D: 0.1631	Loss_G: 4.0661	D(x): 0.9130	D(G(z)): 0.0593 / 0.0299
[82/100][100/169]	Loss_D: 0.0717	Loss_G: 3.5487	D(x): 0.9565	D(G(z)): 0.0249 / 0.0447
[82/100][150/169]	Loss_D: 0.9085	Loss_G: 2.9359	D(x): 0.4948	D(G(z)): 0.0014 / 0.1177
[83/100][0/169]	Loss_D: 0.2877	Loss_G: 3.7398	D(x): 0.7913	D(G(z)): 0.0110 / 0.0529
[83/100][50/169]	Loss_D: 0.1176	Loss_G: 3.6024	D(x): 0.9148	D(G(z)): 0.0223 / 0.0523
[83/100][100/169]	Loss_D: 0.0779	Loss_G: 4.7178	D(x): 0.9570	D(G(z)): 0.0306 / 0.0175
[83/100][150/169]	Loss_D: 0.6479	Loss_G: 7.2867	D(x): 0.9738	D(G(z)): 0.3773 / 0.0015
[84/100][0/169]	Loss_D: 0.1472	Loss_G: 3.6840	D(x): 0.9071	D(G(z)): 0.0327 / 0.0547
[84/100][50/169]	Loss_D: 0.0989	Loss_G: 4.5749	D(x): 0.9453	D(G(z)): 0.0370 / 0.0207
[84/100][100/169]	Loss_D: 0.2727	Loss_G: 3.5910	D(x): 0.8287	D(G(z)): 0.0487 / 0.0557
[84/100][150/169]	Loss_D: 0.1316	Loss_G: 4.2656	D(x): 0.9529	D(G(z)): 0.0688 / 0.0262
[85/100][0/169]	Loss_D: 0.1230	Loss_G: 4.0019	D(x): 0.9229	D(G(z)): 0.0353 / 0.0336
[85/100][50/169]	Loss_D: 1.7938	Loss_G: 2.4499	D(x): 0.7086	D(G(z)): 0.4425 / 0.2499
[85/100][100/169]	Loss_D: 0.1308	Loss_G: 4.2064	D(x): 0.9076	D(G(z)): 0.0219 / 0.0323
[85/100][150/169]	Loss_D: 0.6944	Loss_G: 9.1373	D(x): 0.9587	D(G(z)): 0.4060 / 0.0003
[86/100][0/169]	Loss_D: 0.4704	Loss_G: 3.9720	D(x): 0.7936	D(G(z)): 0.1194 / 0.0392
[86/100][50/169]	Loss_D: 0.1262	Loss_G: 4.6733	D(x): 0.9379	D(G(z)): 0.0528 / 0.0193
[86/100][100/169]	Loss_D: 0.1219	Loss_G: 4.7700	D(x): 0.9576	D(G(z)): 0.0696 / 0.0158
[86/100][150/169]	Loss_D: 0.0787	Loss_G: 4.5747	D(x): 0.9533	D(G(z)): 0.0267 / 0.0198
[87/100][0/169]	Loss_D: 0.7077	Loss_G: 0.6181	D(x): 0.5890	D(G(z)): 0.0329 / 0.6057
[87/100][50/169]	Loss_D: 0.1620	Loss_G: 5.0224	D(x): 0.9323	D(G(z)): 0.0731 / 0.0142
[87/100][100/169]	Loss_D: 0.1080	Loss_G: 5.2903	D(x): 0.9843	D(G(z)): 0.0834 / 0.0081
[87/100][150/169]	Loss_D: 2.3669	Loss_G: 11.6086	D(x): 0.9999	D(G(z)): 0.8286 / 0.0000
[88/100][0/169]	Loss_D: 0.1755	Loss_G: 3.9000	D(x): 0.9244	D(G(z)): 0.0760 / 0.0368
[88/100][50/169]	Loss_D: 0.0992	Loss_G: 4.8500	D(x): 0.9861	D(G(z)): 0.0727 / 0.0118
[88/100][100/169]	Loss_D: 0.0752	Loss_G: 5.1174	D(x): 0.9873	D(G(z)): 0.0570 / 0.0112
[88/100][150/169]	Loss_D: 0.6381	Loss_G: 1.1367	D(x): 0.5813	D(G(z)): 0.0015 / 0.4184
[89/100][0/169]	Loss_D: 0.4318	Loss_G: 5.8757	D(x): 0.9877	D(G(z)): 0.2727 / 0.0056
[89/100][50/169]	Loss_D: 0.1354	Loss_G: 5.0698	D(x): 0.9038	D(G(z)): 0.0227 / 0.0141
[89/100][100/169]	Loss_D: 0.1078	Loss_G: 4.4858	D(x): 0.9375	D(G(z)): 0.0356 / 0.0227
[89/100][150/169]	Loss_D: 0.0751	Loss_G: 4.3166	D(x): 0.9651	D(G(z)): 0.0354 / 0.0265
[90/100][0/169]	Loss_D: 0.0769	Loss_G: 5.1583	D(x): 0.9856	D(G(z)): 0.0579 / 0.0097
[90/100][50/169]	Loss_D: 0.0573	Loss_G: 4.6822	D(x): 0.9741	D(G(z)): 0.0293 / 0.0165
[90/100][100/169]	Loss_D: 0.0599	Loss_G: 4.6820	D(x): 0.9690	D(G(z)): 0.0264 / 0.0186
[90/100][150/169]	Loss_D: 0.0698	Loss_G: 4.4135	D(x): 0.9620	D(G(z)): 0.0287 / 0.0248
[91/100][0/169]	Loss_D: 0.0586	Loss_G: 4.1484	D(x): 0.9727	D(G(z)): 0.0292 / 0.0276
[91/100][50/169]	Loss_D: 0.0978	Loss_G: 5.0343	D(x): 0.9844	D(G(z)): 0.0721 / 0.0110
[91/100][100/169]	Loss_D: 0.0792	Loss_G: 3.7467	D(x): 0.9525	D(G(z)): 0.0276 / 0.0435
[91/100][150/169]	Loss_D: 0.0910	Loss_G: 4.4805	D(x): 0.9483	D(G(z)): 0.0331 / 0.0233
[92/100][0/169]	Loss_D: 0.1090	Loss_G: 3.8887	D(x): 0.9190	D(G(z)): 0.0175 / 0.0462
[92/100][50/169]	Loss_D: 0.0938	Loss_G: 4.9522	D(x): 0.9224	D(G(z)): 0.0091 / 0.0181
[92/100][100/169]	Loss_D: 0.3368	Loss_G: 4.0747	D(x): 0.8613	D(G(z)): 0.1149 / 0.0338
[92/100][150/169]	Loss_D: 0.1170	Loss_G: 4.8768	D(x): 0.9240	D(G(z)): 0.0306 / 0.0175
[93/100][0/169]	Loss_D: 0.2669	Loss_G: 2.7305	D(x): 0.8575	D(G(z)): 0.0833 / 0.0894
[93/100][50/169]	Loss_D: 0.1041	Loss_G: 4.6730	D(x): 0.9472	D(G(z)): 0.0417 / 0.0169
[93/100][100/169]	Loss_D: 0.0832	Loss_G: 4.5910	D(x): 0.9711	D(G(z)): 0.0489 / 0.0179
[93/100][150/169]	Loss_D: 0.2700	Loss_G: 4.8994	D(x): 0.9669	D(G(z)): 0.1777 / 0.0144
[94/100][0/169]	Loss_D: 0.2351	Loss_G: 6.7038	D(x): 0.9831	D(G(z)): 0.1702 / 0.0020
[94/100][50/169]	Loss_D: 0.0747	Loss_G: 4.2847	D(x): 0.9615	D(G(z)): 0.0327 / 0.0255
[94/100][100/169]	Loss_D: 0.0565	Loss_G: 5.0452	D(x): 0.9821	D(G(z)): 0.0356 / 0.0123
[94/100][150/169]	Loss_D: 0.2861	Loss_G: 3.4364	D(x): 0.9312	D(G(z)): 0.1689 / 0.0530
[95/100][0/169]	Loss_D: 0.2796	Loss_G: 6.9961	D(x): 0.9600	D(G(z)): 0.1802 / 0.0018
[95/100][50/169]	Loss_D: 0.1205	Loss_G: 3.7514	D(x): 0.9159	D(G(z)): 0.0212 / 0.0426
[95/100][100/169]	Loss_D: 0.0804	Loss_G: 3.9747	D(x): 0.9385	D(G(z)): 0.0147 / 0.0329
[95/100][150/169]	Loss_D: 0.1374	Loss_G: 4.2748	D(x): 0.9134	D(G(z)): 0.0378 / 0.0274
[96/100][0/169]	Loss_D: 0.0562	Loss_G: 5.1368	D(x): 0.9757	D(G(z)): 0.0300 / 0.0107
[96/100][50/169]	Loss_D: 0.1323	Loss_G: 6.5426	D(x): 0.9873	D(G(z)): 0.1036 / 0.0026
[96/100][100/169]	Loss_D: 0.0607	Loss_G: 4.4010	D(x): 0.9712	D(G(z)): 0.0297 / 0.0216
[96/100][150/169]	Loss_D: 0.0968	Loss_G: 4.1614	D(x): 0.9382	D(G(z)): 0.0284 / 0.0307
[97/100][0/169]	Loss_D: 0.0706	Loss_G: 5.0230	D(x): 0.9845	D(G(z)): 0.0498 / 0.0122
[97/100][50/169]	Loss_D: 0.2281	Loss_G: 7.9151	D(x): 0.9977	D(G(z)): 0.1765 / 0.0006
[97/100][100/169]	Loss_D: 0.0873	Loss_G: 3.7759	D(x): 0.9320	D(G(z)): 0.0129 / 0.0428
[97/100][150/169]	Loss_D: 0.0559	Loss_G: 5.1218	D(x): 0.9599	D(G(z)): 0.0132 / 0.0133
[98/100][0/169]	Loss_D: 3.9742	Loss_G: 9.0048	D(x): 0.9999	D(G(z)): 0.9433 / 0.0004
[98/100][50/169]	Loss_D: 0.2032	Loss_G: 5.4107	D(x): 0.9414	D(G(z)): 0.1056 / 0.0094
[98/100][100/169]	Loss_D: 0.2060	Loss_G: 3.4595	D(x): 0.8807	D(G(z)): 0.0470 / 0.0649
[98/100][150/169]	Loss_D: 0.1076	Loss_G: 3.8570	D(x): 0.9407	D(G(z)): 0.0405 / 0.0401
[99/100][0/169]	Loss_D: 0.0824	Loss_G: 5.4666	D(x): 0.9822	D(G(z)): 0.0556 / 0.0088
[99/100][50/169]	Loss_D: 0.1015	Loss_G: 3.5747	D(x): 0.9177	D(G(z)): 0.0104 / 0.0517
[99/100][100/169]	Loss_D: 0.0596	Loss_G: 4.4748	D(x): 0.9656	D(G(z)): 0.0227 / 0.0228
[99/100][150/169]	Loss_D: 0.1584	Loss_G: 4.3108	D(x): 0.8964	D(G(z)): 0.0248 / 0.0274

Results

Finally, lets check out how we did. Here, we will look at three different results. First, we will see how D and G’s losses changed during training. Second, we will visualize G’s output on the fixed_noise batch for every epoch. And third, we will look at a batch of real data next to a batch of fake data from G.

Loss versus training iteration

Below is a plot of D & G’s losses versus training iterations.

In [ ]:
plt.figure(figsize=(10, 5))
plt.title('Generator and Discriminator Loss During Training')
plt.plot(G_losses, label='G')
plt.plot(D_losses, label='D')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.legend()
plt.show()

Visualization of G’s progression

Remember how we saved the generator’s output on the fixed_noise batch after every epoch of training. Now, we can visualize the training progression of G with an animation. Press the play button to start the animation.

In [ ]:
#%%capture
fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)

HTML(ani.to_jshtml())
Animation size has reached 21214755 bytes, exceeding the limit of 20971520.0. If you're sure you want a larger animation embedded, set the animation.embed_limit rc parameter to a larger value (in MB). This and further frames will be dropped.
Out[ ]:

Real Images vs. Fake Images

Finally, lets take a look at some real images and fake images side by side.

In [ ]:
# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))

# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))

# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()
In [ ]: